Classifying Kaggle Digits using Convolutional MLP

Based on Theano's Deep Learning Tutorial Release 0.1 running on a g2.2xlarge AMI on Amazon Web Services in August 2015 by George FIsher.


In [1]:
dataset='kaggle.pkl.gz'

In [2]:
class LogisticRegression(object):
    """Multi-class Logistic Regression Class

    The logistic regression is fully described by a weight matrix :math:`W`
    and bias vector :math:`b`. Classification is done by projecting data
    points onto a set of hyperplanes, the distance to which is used to
    determine a class membership probability.
    """

    def __init__(self, input, n_in, n_out):
        """ Initialize the parameters of the logistic regression

        :type input: theano.tensor.TensorType
        :param input: symbolic variable that describes the input of the
                      architecture (one minibatch)

        :type n_in: int
        :param n_in: number of input units, the dimension of the space in
                     which the datapoints lie

        :type n_out: int
        :param n_out: number of output units, the dimension of the space in
                      which the labels lie

        """
        # start-snippet-1
        # initialize with 0 the weights W as a matrix of shape (n_in, n_out)
        self.W = theano.shared(
            value=numpy.zeros(
                (n_in, n_out),
                dtype=theano.config.floatX
            ),
            name='W',
            borrow=True
        )
        # initialize the biases b as a vector of n_out 0s
        self.b = theano.shared(
            value=numpy.zeros(
                (n_out,),
                dtype=theano.config.floatX
            ),
            name='b',
            borrow=True
        )

        # symbolic expression for computing the matrix of class-membership
        # probabilities
        # Where:
        # W is a matrix where column-k represent the separation hyperplane for
        # class-k
        # x is a matrix where row-j  represents input training sample-j
        # b is a vector where element-k represent the free parameter of
        # hyperplane-k
        self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)

        # symbolic description of how to compute prediction as class whose
        # probability is maximal
        self.y_pred = T.argmax(self.p_y_given_x, axis=1)
        # end-snippet-1

        # parameters of the model
        self.params = [self.W, self.b]

        # keep track of model input
        self.input = input

    def negative_log_likelihood(self, y):
        """Return the mean of the negative log-likelihood of the prediction
        of this model under a given target distribution.

        .. math::

            \frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
            \frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|}
                \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
            \ell (\theta=\{W,b\}, \mathcal{D})

        :type y: theano.tensor.TensorType
        :param y: corresponds to a vector that gives for each example the
                  correct label

        Note: we use the mean instead of the sum so that
              the learning rate is less dependent on the batch size
        """
        # start-snippet-2
        # y.shape[0] is (symbolically) the number of rows in y, i.e.,
        # number of examples (call it n) in the minibatch
        # T.arange(y.shape[0]) is a symbolic vector which will contain
        # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
        # Log-Probabilities (call it LP) with one row per example and
        # one column per class LP[T.arange(y.shape[0]),y] is a vector
        # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
        # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
        # the mean (across minibatch examples) of the elements in v,
        # i.e., the mean log-likelihood across the minibatch.
        return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
        # end-snippet-2

    def errors(self, y):
        """Return a float representing the number of errors in the minibatch
        over the total number of examples of the minibatch ; zero one
        loss over the size of the minibatch

        :type y: theano.tensor.TensorType
        :param y: corresponds to a vector that gives for each example the
                  correct label
        """

        # check if y has same dimension of y_pred
        if y.ndim != self.y_pred.ndim:
            raise TypeError(
                'y should have the same shape as self.y_pred',
                ('y', y.type, 'y_pred', self.y_pred.type)
            )
        # check if y is of the correct datatype
        if y.dtype.startswith('int'):
            # the T.neq operator returns a vector of 0s and 1s, where 1
            # represents a mistake in prediction
            return T.mean(T.neq(self.y_pred, y))
        else:
            raise NotImplementedError()
            
    def prediction(self):
        return self.y_pred

In [3]:
"""This tutorial introduces the LeNet5 neural network architecture
using Theano.  LeNet5 is a convolutional neural network, good for
classifying images. This tutorial shows how to build the architecture,
and comes with all the hyper-parameters you need to reproduce the
paper's MNIST results.


This implementation simplifies the model in the following ways:

 - LeNetConvPool doesn't implement location-specific gain and bias parameters
 - LeNetConvPool doesn't implement pooling by average, it implements pooling
   by max.
 - Digit classification is implemented with a logistic regression rather than
   an RBF network
 - LeNet5 was not fully-connected convolutions at second layer

References:
 - Y. LeCun, L. Bottou, Y. Bengio and P. Haffner:
   Gradient-Based Learning Applied to Document
   Recognition, Proceedings of the IEEE, 86(11):2278-2324, November 1998.
   http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf

"""
import os
import sys
import timeit
import cPickle

import numpy

import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv

from logistic_sgd import  load_data #LogisticRegression,
from mlp import HiddenLayer


class LeNetConvPoolLayer(object):
    """Pool Layer of a convolutional network """

    def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
        """
        Allocate a LeNetConvPoolLayer with shared variable internal parameters.

        :type rng: numpy.random.RandomState
        :param rng: a random number generator used to initialize weights

        :type input: theano.tensor.dtensor4
        :param input: symbolic image tensor, of shape image_shape

        :type filter_shape: tuple or list of length 4
        :param filter_shape: (number of filters, num input feature maps,
                              filter height, filter width)

        :type image_shape: tuple or list of length 4
        :param image_shape: (batch size, num input feature maps,
                             image height, image width)

        :type poolsize: tuple or list of length 2
        :param poolsize: the downsampling (pooling) factor (#rows, #cols)
        """

        assert image_shape[1] == filter_shape[1]
        self.input = input

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = numpy.prod(filter_shape[1:])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
                   numpy.prod(poolsize))
        # initialize weights with random weights
        W_bound = numpy.sqrt(6. / (fan_in + fan_out))
        self.W = theano.shared(
            numpy.asarray(
                rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
                dtype=theano.config.floatX
            ),
            borrow=True
        )

        # the bias is a 1D tensor -- one bias per output feature map
        b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
        self.b = theano.shared(value=b_values, borrow=True)

        # convolve input feature maps with filters
        conv_out = conv.conv2d(
            input=input,
            filters=self.W,
            filter_shape=filter_shape,
            image_shape=image_shape
        )

        # downsample each feature map individually, using maxpooling
        pooled_out = downsample.max_pool_2d(
            input=conv_out,
            ds=poolsize,
            ignore_border=True
        )

        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
        # thus be broadcasted across mini-batches and feature map
        # width & height
        self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))

        # store parameters of this layer
        self.params = [self.W, self.b]

        # keep track of model input
        self.input = input


def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
                    dataset='mnist.pkl.gz',
                    nkerns=[20, 50], batch_size=500):
    """ Demonstrates lenet on MNIST dataset

    :type learning_rate: float
    :param learning_rate: learning rate used (factor for the stochastic
                          gradient)

    :type n_epochs: int
    :param n_epochs: maximal number of epochs to run the optimizer

    :type dataset: string
    :param dataset: path to the dataset used for training /testing (MNIST here)

    :type nkerns: list of ints
    :param nkerns: number of kernels on each layer
    """

    rng = numpy.random.RandomState(23455)

    datasets = load_data(dataset)

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0]
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
    n_test_batches = test_set_x.get_value(borrow=True).shape[0]
    n_train_batches /= batch_size
    n_valid_batches /= batch_size
    n_test_batches /= batch_size

    # allocate symbolic variables for the data
    index = T.lscalar()  # index to a [mini]batch

    # start-snippet-1
    x = T.matrix('x')   # the data is presented as rasterized images
    y = T.ivector('y')  # the labels are presented as 1D vector of
                        # [int] labels

    ######################
    # BUILD ACTUAL MODEL #
    ######################
    print '... building the model'

    # Reshape matrix of rasterized images of shape (batch_size, 28 * 28)
    # to a 4D tensor, compatible with our LeNetConvPoolLayer
    # (28, 28) is the size of MNIST images.
    layer0_input = x.reshape((batch_size, 1, 28, 28))

    # Construct the first convolutional pooling layer:
    # filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
    # maxpooling reduces this further to (24/2, 24/2) = (12, 12)
    # 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)
    layer0 = LeNetConvPoolLayer(
        rng,
        input=layer0_input,
        image_shape=(batch_size, 1, 28, 28),
        filter_shape=(nkerns[0], 1, 5, 5),
        poolsize=(2, 2)
    )

    # Construct the second convolutional pooling layer
    # filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
    # maxpooling reduces this further to (8/2, 8/2) = (4, 4)
    # 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4)
    layer1 = LeNetConvPoolLayer(
        rng,
        input=layer0.output,
        image_shape=(batch_size, nkerns[0], 12, 12),
        filter_shape=(nkerns[1], nkerns[0], 5, 5),
        poolsize=(2, 2)
    )

    # the HiddenLayer being fully-connected, it operates on 2D matrices of
    # shape (batch_size, num_pixels) (i.e matrix of rasterized images).
    # This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
    # or (500, 50 * 4 * 4) = (500, 800) with the default values.
    layer2_input = layer1.output.flatten(2)

    # construct a fully-connected sigmoidal layer
    layer2 = HiddenLayer(
        rng,
        input=layer2_input,
        n_in=nkerns[1] * 4 * 4,
        n_out=500,
        activation=T.tanh
    )

    # classify the values of the fully-connected sigmoidal layer
    layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)

    # the cost we minimize during training is the NLL of the model
    cost = layer3.negative_log_likelihood(y)

    # create a function to compute the mistakes that are made by the model
    test_model = theano.function(
        [index],
        layer3.errors(y),
        givens={
            x: test_set_x[index * batch_size: (index + 1) * batch_size],
            y: test_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )

    test_pred = theano.function(
        [index],
        layer3.prediction(),
        givens={
            x: test_set_x[index * batch_size: (index + 1) * batch_size],
            y: test_set_y[index * batch_size: (index + 1) * batch_size]
        },
        on_unused_input='ignore'
    )
    
    
    validate_model = theano.function(
        [index],
        layer3.errors(y),
        givens={
            x: valid_set_x[index * batch_size: (index + 1) * batch_size],
            y: valid_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )

    # create a list of all model parameters to be fit by gradient descent
    params = layer3.params + layer2.params + layer1.params + layer0.params

    # create a list of gradients for all model parameters
    grads = T.grad(cost, params)

    # train_model is a function that updates the model parameters by
    # SGD Since this model has many parameters, it would be tedious to
    # manually create an update rule for each model parameter. We thus
    # create the updates list by automatically looping over all
    # (params[i], grads[i]) pairs.
    updates = [
        (param_i, param_i - learning_rate * grad_i)
        for param_i, grad_i in zip(params, grads)
    ]

    train_model = theano.function(
        [index],
        cost,
        updates=updates,
        givens={
            x: train_set_x[index * batch_size: (index + 1) * batch_size],
            y: train_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )
    # end-snippet-1

    ###############
    # TRAIN MODEL #
    ###############
    print '... training'
    # early-stopping parameters
    patience = 10000  # look as this many examples regardless
    patience_increase = 2  # wait this much longer when a new best is
                           # found
    improvement_threshold = 0.995  # a relative improvement of this much is
                                   # considered significant
    validation_frequency = min(n_train_batches, patience / 2)
                                  # go through this many
                                  # minibatche before checking the network
                                  # on the validation set; in this case we
                                  # check every epoch

    best_validation_loss = numpy.inf
    best_iter = 0
    test_score = 0.
    start_time = timeit.default_timer()

    epoch = 0
    done_looping = False

    while (epoch < n_epochs) and (not done_looping):
        epoch = epoch + 1
        for minibatch_index in xrange(n_train_batches):

            iter = (epoch - 1) * n_train_batches + minibatch_index

            if iter % 100 == 0:
                print 'training @ iter = ', iter
            cost_ij = train_model(minibatch_index)

            if (iter + 1) % validation_frequency == 0:

                # compute zero-one loss on validation set
                validation_losses = [validate_model(i) for i
                                     in xrange(n_valid_batches)]
                this_validation_loss = numpy.mean(validation_losses)
                print('epoch %i, minibatch %i/%i, validation error %f %%' %
                      (epoch, minibatch_index + 1, n_train_batches,
                       this_validation_loss * 100.))

                # if we got the best validation score until now
                if this_validation_loss < best_validation_loss:

                    #improve patience if loss improvement is good enough
                    if this_validation_loss < best_validation_loss *  \
                       improvement_threshold:
                        patience = max(patience, iter * patience_increase)

                    # save best validation score and iteration number
                    best_validation_loss = this_validation_loss
                    best_iter = iter

                    # test it on the test set
                    test_losses = [
                        test_model(i)
                        for i in xrange(n_test_batches)
                    ]
                    
                    test_preds = [
                        test_pred(i)
                        for i in xrange(n_test_batches)
                    ]
                    
                     # save the best model
                    with open('best_cnn_pred_model.pkl', 'w') as f:
                        cPickle.dump(test_preds, f)
                    
                    test_score = numpy.mean(test_losses)
                    print(('     epoch %i, minibatch %i/%i, test error of '
                           'best model %f %%') %
                          (epoch, minibatch_index + 1, n_train_batches,
                           test_score * 100.))

            if patience <= iter:
                done_looping = True
                break

    end_time = timeit.default_timer()
    print('Optimization complete.')
    print('Best validation score of %f %% obtained at iteration %i, '
          'with test performance %f %%' %
          (best_validation_loss * 100., best_iter + 1, test_score * 100.))
    print >> sys.stderr, ('The code for file ' +
                          os.path.split(dataset)[1] +
                          ' ran for %.2fm' % ((end_time - start_time) / 60.))

# if __name__ == '__main__':
#     evaluate_lenet5()


def experiment(state, channel):
    evaluate_lenet5(state.learning_rate, dataset=state.dataset)


Couldn't import dot_parser, loading of dot files will not be possible.
Using gpu device 0: GRID K520 (CNMeM is disabled)

In [4]:
evaluate_lenet5(dataset=dataset)


... loading data
... building the model
... training
training @ iter =  0
training @ iter =  100
epoch 1, minibatch 139/139, validation error 8.114286 %
     epoch 1, minibatch 139/139, test error of best model 89.946429 %
training @ iter =  200
epoch 2, minibatch 139/139, validation error 5.142857 %
     epoch 2, minibatch 139/139, test error of best model 90.050000 %
training @ iter =  300
training @ iter =  400
epoch 3, minibatch 139/139, validation error 3.821429 %
     epoch 3, minibatch 139/139, test error of best model 90.042857 %
training @ iter =  500
epoch 4, minibatch 139/139, validation error 3.092857 %
     epoch 4, minibatch 139/139, test error of best model 90.028571 %
training @ iter =  600
epoch 5, minibatch 139/139, validation error 2.564286 %
     epoch 5, minibatch 139/139, test error of best model 90.021429 %
training @ iter =  700
training @ iter =  800
epoch 6, minibatch 139/139, validation error 2.292857 %
     epoch 6, minibatch 139/139, test error of best model 90.010714 %
training @ iter =  900
epoch 7, minibatch 139/139, validation error 1.950000 %
     epoch 7, minibatch 139/139, test error of best model 90.000000 %
training @ iter =  1000
training @ iter =  1100
epoch 8, minibatch 139/139, validation error 1.764286 %
     epoch 8, minibatch 139/139, test error of best model 90.000000 %
training @ iter =  1200
epoch 9, minibatch 139/139, validation error 1.614286 %
     epoch 9, minibatch 139/139, test error of best model 90.007143 %
training @ iter =  1300
epoch 10, minibatch 139/139, validation error 1.514286 %
     epoch 10, minibatch 139/139, test error of best model 89.996429 %
training @ iter =  1400
training @ iter =  1500
epoch 11, minibatch 139/139, validation error 1.421429 %
     epoch 11, minibatch 139/139, test error of best model 89.989286 %
training @ iter =  1600
epoch 12, minibatch 139/139, validation error 1.314286 %
     epoch 12, minibatch 139/139, test error of best model 89.996429 %
training @ iter =  1700
training @ iter =  1800
epoch 13, minibatch 139/139, validation error 1.278571 %
     epoch 13, minibatch 139/139, test error of best model 89.992857 %
training @ iter =  1900
epoch 14, minibatch 139/139, validation error 1.214286 %
     epoch 14, minibatch 139/139, test error of best model 89.992857 %
training @ iter =  2000
epoch 15, minibatch 139/139, validation error 1.185714 %
     epoch 15, minibatch 139/139, test error of best model 89.992857 %
training @ iter =  2100
training @ iter =  2200
epoch 16, minibatch 139/139, validation error 1.142857 %
     epoch 16, minibatch 139/139, test error of best model 90.000000 %
training @ iter =  2300
epoch 17, minibatch 139/139, validation error 1.078571 %
     epoch 17, minibatch 139/139, test error of best model 90.000000 %
training @ iter =  2400
training @ iter =  2500
epoch 18, minibatch 139/139, validation error 1.014286 %
     epoch 18, minibatch 139/139, test error of best model 90.000000 %
training @ iter =  2600
epoch 19, minibatch 139/139, validation error 1.000000 %
     epoch 19, minibatch 139/139, test error of best model 90.000000 %
training @ iter =  2700
epoch 20, minibatch 139/139, validation error 0.971429 %
     epoch 20, minibatch 139/139, test error of best model 90.003571 %
training @ iter =  2800
training @ iter =  2900
epoch 21, minibatch 139/139, validation error 0.957143 %
     epoch 21, minibatch 139/139, test error of best model 90.003571 %
training @ iter =  3000
epoch 22, minibatch 139/139, validation error 0.921429 %
     epoch 22, minibatch 139/139, test error of best model 90.003571 %
training @ iter =  3100
epoch 23, minibatch 139/139, validation error 0.907143 %
     epoch 23, minibatch 139/139, test error of best model 90.003571 %
training @ iter =  3200
training @ iter =  3300
epoch 24, minibatch 139/139, validation error 0.900000 %
     epoch 24, minibatch 139/139, test error of best model 90.000000 %
training @ iter =  3400
epoch 25, minibatch 139/139, validation error 0.892857 %
     epoch 25, minibatch 139/139, test error of best model 89.996429 %
training @ iter =  3500
training @ iter =  3600
epoch 26, minibatch 139/139, validation error 0.878571 %
     epoch 26, minibatch 139/139, test error of best model 89.996429 %
training @ iter =  3700
epoch 27, minibatch 139/139, validation error 0.850000 %
     epoch 27, minibatch 139/139, test error of best model 90.000000 %
training @ iter =  3800
epoch 28, minibatch 139/139, validation error 0.828571 %
     epoch 28, minibatch 139/139, test error of best model 90.000000 %
training @ iter =  3900
training @ iter =  4000
epoch 29, minibatch 139/139, validation error 0.821429 %
     epoch 29, minibatch 139/139, test error of best model 90.000000 %
training @ iter =  4100
epoch 30, minibatch 139/139, validation error 0.807143 %
     epoch 30, minibatch 139/139, test error of best model 90.000000 %
training @ iter =  4200
training @ iter =  4300
epoch 31, minibatch 139/139, validation error 0.785714 %
     epoch 31, minibatch 139/139, test error of best model 89.996429 %
training @ iter =  4400
epoch 32, minibatch 139/139, validation error 0.778571 %
     epoch 32, minibatch 139/139, test error of best model 89.996429 %
training @ iter =  4500
epoch 33, minibatch 139/139, validation error 0.764286 %
     epoch 33, minibatch 139/139, test error of best model 90.000000 %
training @ iter =  4600
training @ iter =  4700
epoch 34, minibatch 139/139, validation error 0.750000 %
     epoch 34, minibatch 139/139, test error of best model 90.000000 %
training @ iter =  4800
epoch 35, minibatch 139/139, validation error 0.735714 %
     epoch 35, minibatch 139/139, test error of best model 90.000000 %
training @ iter =  4900
training @ iter =  5000
epoch 36, minibatch 139/139, validation error 0.742857 %
training @ iter =  5100
epoch 37, minibatch 139/139, validation error 0.735714 %
training @ iter =  5200
epoch 38, minibatch 139/139, validation error 0.735714 %
training @ iter =  5300
training @ iter =  5400
epoch 39, minibatch 139/139, validation error 0.721429 %
     epoch 39, minibatch 139/139, test error of best model 89.992857 %
training @ iter =  5500
epoch 40, minibatch 139/139, validation error 0.707143 %
     epoch 40, minibatch 139/139, test error of best model 89.996429 %
training @ iter =  5600
epoch 41, minibatch 139/139, validation error 0.707143 %
training @ iter =  5700
training @ iter =  5800
epoch 42, minibatch 139/139, validation error 0.700000 %
     epoch 42, minibatch 139/139, test error of best model 89.996429 %
training @ iter =  5900
epoch 43, minibatch 139/139, validation error 0.685714 %
     epoch 43, minibatch 139/139, test error of best model 89.996429 %
training @ iter =  6000
training @ iter =  6100
epoch 44, minibatch 139/139, validation error 0.685714 %
training @ iter =  6200
epoch 45, minibatch 139/139, validation error 0.678571 %
     epoch 45, minibatch 139/139, test error of best model 89.996429 %
training @ iter =  6300
epoch 46, minibatch 139/139, validation error 0.657143 %
     epoch 46, minibatch 139/139, test error of best model 89.996429 %
training @ iter =  6400
training @ iter =  6500
epoch 47, minibatch 139/139, validation error 0.657143 %
training @ iter =  6600
epoch 48, minibatch 139/139, validation error 0.657143 %
training @ iter =  6700
training @ iter =  6800
epoch 49, minibatch 139/139, validation error 0.657143 %
training @ iter =  6900
epoch 50, minibatch 139/139, validation error 0.650000 %
     epoch 50, minibatch 139/139, test error of best model 89.992857 %
training @ iter =  7000
epoch 51, minibatch 139/139, validation error 0.642857 %
     epoch 51, minibatch 139/139, test error of best model 89.989286 %
training @ iter =  7100
training @ iter =  7200
epoch 52, minibatch 139/139, validation error 0.628571 %
     epoch 52, minibatch 139/139, test error of best model 89.989286 %
training @ iter =  7300
epoch 53, minibatch 139/139, validation error 0.635714 %
training @ iter =  7400
training @ iter =  7500
epoch 54, minibatch 139/139, validation error 0.635714 %
training @ iter =  7600
epoch 55, minibatch 139/139, validation error 0.650000 %
training @ iter =  7700
epoch 56, minibatch 139/139, validation error 0.628571 %
training @ iter =  7800
training @ iter =  7900
epoch 57, minibatch 139/139, validation error 0.628571 %
training @ iter =  8000
epoch 58, minibatch 139/139, validation error 0.628571 %
training @ iter =  8100
training @ iter =  8200
epoch 59, minibatch 139/139, validation error 0.628571 %
training @ iter =  8300
epoch 60, minibatch 139/139, validation error 0.628571 %
training @ iter =  8400
epoch 61, minibatch 139/139, validation error 0.621429 %
     epoch 61, minibatch 139/139, test error of best model 89.978571 %
training @ iter =  8500
training @ iter =  8600
epoch 62, minibatch 139/139, validation error 0.635714 %
training @ iter =  8700
epoch 63, minibatch 139/139, validation error 0.628571 %
training @ iter =  8800
epoch 64, minibatch 139/139, validation error 0.628571 %
training @ iter =  8900
training @ iter =  9000
epoch 65, minibatch 139/139, validation error 0.628571 %
training @ iter =  9100
epoch 66, minibatch 139/139, validation error 0.621429 %
training @ iter =  9200
training @ iter =  9300
epoch 67, minibatch 139/139, validation error 0.621429 %
training @ iter =  9400
epoch 68, minibatch 139/139, validation error 0.621429 %
training @ iter =  9500
epoch 69, minibatch 139/139, validation error 0.621429 %
training @ iter =  9600
training @ iter =  9700
epoch 70, minibatch 139/139, validation error 0.621429 %
training @ iter =  9800
epoch 71, minibatch 139/139, validation error 0.621429 %
training @ iter =  9900
training @ iter =  10000
epoch 72, minibatch 139/139, validation error 0.621429 %
training @ iter =  10100
epoch 73, minibatch 139/139, validation error 0.621429 %
training @ iter =  10200
epoch 74, minibatch 139/139, validation error 0.621429 %
training @ iter =  10300
training @ iter =  10400
epoch 75, minibatch 139/139, validation error 0.614286 %
     epoch 75, minibatch 139/139, test error of best model 89.975000 %
training @ iter =  10500
epoch 76, minibatch 139/139, validation error 0.614286 %
training @ iter =  10600
training @ iter =  10700
epoch 77, minibatch 139/139, validation error 0.607143 %
     epoch 77, minibatch 139/139, test error of best model 89.978571 %
training @ iter =  10800
epoch 78, minibatch 139/139, validation error 0.607143 %
training @ iter =  10900
epoch 79, minibatch 139/139, validation error 0.592857 %
     epoch 79, minibatch 139/139, test error of best model 89.978571 %
training @ iter =  11000
training @ iter =  11100
epoch 80, minibatch 139/139, validation error 0.585714 %
     epoch 80, minibatch 139/139, test error of best model 89.978571 %
training @ iter =  11200
epoch 81, minibatch 139/139, validation error 0.585714 %
training @ iter =  11300
epoch 82, minibatch 139/139, validation error 0.585714 %
training @ iter =  11400
training @ iter =  11500
epoch 83, minibatch 139/139, validation error 0.585714 %
training @ iter =  11600
epoch 84, minibatch 139/139, validation error 0.585714 %
training @ iter =  11700
training @ iter =  11800
epoch 85, minibatch 139/139, validation error 0.578571 %
     epoch 85, minibatch 139/139, test error of best model 89.971429 %
training @ iter =  11900
epoch 86, minibatch 139/139, validation error 0.571429 %
     epoch 86, minibatch 139/139, test error of best model 89.971429 %
training @ iter =  12000
epoch 87, minibatch 139/139, validation error 0.571429 %
training @ iter =  12100
training @ iter =  12200
epoch 88, minibatch 139/139, validation error 0.564286 %
     epoch 88, minibatch 139/139, test error of best model 89.971429 %
training @ iter =  12300
epoch 89, minibatch 139/139, validation error 0.557143 %
     epoch 89, minibatch 139/139, test error of best model 89.971429 %
training @ iter =  12400
training @ iter =  12500
epoch 90, minibatch 139/139, validation error 0.557143 %
training @ iter =  12600
epoch 91, minibatch 139/139, validation error 0.557143 %
training @ iter =  12700
epoch 92, minibatch 139/139, validation error 0.557143 %
training @ iter =  12800
training @ iter =  12900
epoch 93, minibatch 139/139, validation error 0.557143 %
training @ iter =  13000
epoch 94, minibatch 139/139, validation error 0.564286 %
training @ iter =  13100
training @ iter =  13200
epoch 95, minibatch 139/139, validation error 0.557143 %
training @ iter =  13300
epoch 96, minibatch 139/139, validation error 0.557143 %
training @ iter =  13400
epoch 97, minibatch 139/139, validation error 0.557143 %
training @ iter =  13500
training @ iter =  13600
epoch 98, minibatch 139/139, validation error 0.557143 %
training @ iter =  13700
epoch 99, minibatch 139/139, validation error 0.557143 %
training @ iter =  13800
epoch 100, minibatch 139/139, validation error 0.557143 %
training @ iter =  13900
training @ iter =  14000
epoch 101, minibatch 139/139, validation error 0.557143 %
training @ iter =  14100
epoch 102, minibatch 139/139, validation error 0.557143 %
training @ iter =  14200
training @ iter =  14300
epoch 103, minibatch 139/139, validation error 0.557143 %
training @ iter =  14400
epoch 104, minibatch 139/139, validation error 0.557143 %
training @ iter =  14500
epoch 105, minibatch 139/139, validation error 0.557143 %
training @ iter =  14600
training @ iter =  14700
epoch 106, minibatch 139/139, validation error 0.528571 %
     epoch 106, minibatch 139/139, test error of best model 89.960714 %
training @ iter =  14800
epoch 107, minibatch 139/139, validation error 0.528571 %
training @ iter =  14900
training @ iter =  15000
epoch 108, minibatch 139/139, validation error 0.528571 %
training @ iter =  15100
epoch 109, minibatch 139/139, validation error 0.528571 %
training @ iter =  15200
epoch 110, minibatch 139/139, validation error 0.528571 %
training @ iter =  15300
training @ iter =  15400
epoch 111, minibatch 139/139, validation error 0.528571 %
training @ iter =  15500
epoch 112, minibatch 139/139, validation error 0.528571 %
training @ iter =  15600
training @ iter =  15700
epoch 113, minibatch 139/139, validation error 0.521429 %
     epoch 113, minibatch 139/139, test error of best model 89.960714 %
training @ iter =  15800
epoch 114, minibatch 139/139, validation error 0.521429 %
training @ iter =  15900
epoch 115, minibatch 139/139, validation error 0.521429 %
training @ iter =  16000
training @ iter =  16100
epoch 116, minibatch 139/139, validation error 0.514286 %
     epoch 116, minibatch 139/139, test error of best model 89.964286 %
training @ iter =  16200
epoch 117, minibatch 139/139, validation error 0.514286 %
training @ iter =  16300
training @ iter =  16400
epoch 118, minibatch 139/139, validation error 0.514286 %
training @ iter =  16500
epoch 119, minibatch 139/139, validation error 0.514286 %
training @ iter =  16600
epoch 120, minibatch 139/139, validation error 0.514286 %
training @ iter =  16700
training @ iter =  16800
epoch 121, minibatch 139/139, validation error 0.514286 %
training @ iter =  16900
epoch 122, minibatch 139/139, validation error 0.514286 %
training @ iter =  17000
epoch 123, minibatch 139/139, validation error 0.514286 %
training @ iter =  17100
training @ iter =  17200
epoch 124, minibatch 139/139, validation error 0.507143 %
     epoch 124, minibatch 139/139, test error of best model 89.964286 %
training @ iter =  17300
epoch 125, minibatch 139/139, validation error 0.507143 %
training @ iter =  17400
training @ iter =  17500
epoch 126, minibatch 139/139, validation error 0.507143 %
training @ iter =  17600
epoch 127, minibatch 139/139, validation error 0.507143 %
training @ iter =  17700
epoch 128, minibatch 139/139, validation error 0.507143 %
training @ iter =  17800
training @ iter =  17900
epoch 129, minibatch 139/139, validation error 0.507143 %
training @ iter =  18000
epoch 130, minibatch 139/139, validation error 0.507143 %
training @ iter =  18100
training @ iter =  18200
epoch 131, minibatch 139/139, validation error 0.507143 %
training @ iter =  18300
epoch 132, minibatch 139/139, validation error 0.507143 %
training @ iter =  18400
epoch 133, minibatch 139/139, validation error 0.507143 %
training @ iter =  18500
training @ iter =  18600
epoch 134, minibatch 139/139, validation error 0.507143 %
training @ iter =  18700
epoch 135, minibatch 139/139, validation error 0.507143 %
training @ iter =  18800
training @ iter =  18900
epoch 136, minibatch 139/139, validation error 0.507143 %
training @ iter =  19000
epoch 137, minibatch 139/139, validation error 0.507143 %
training @ iter =  19100
epoch 138, minibatch 139/139, validation error 0.500000 %
     epoch 138, minibatch 139/139, test error of best model 89.960714 %
training @ iter =  19200
training @ iter =  19300
epoch 139, minibatch 139/139, validation error 0.500000 %
training @ iter =  19400
epoch 140, minibatch 139/139, validation error 0.500000 %
training @ iter =  19500
epoch 141, minibatch 139/139, validation error 0.500000 %
training @ iter =  19600
training @ iter =  19700
epoch 142, minibatch 139/139, validation error 0.507143 %
training @ iter =  19800
epoch 143, minibatch 139/139, validation error 0.507143 %
training @ iter =  19900
training @ iter =  20000
epoch 144, minibatch 139/139, validation error 0.500000 %
training @ iter =  20100
epoch 145, minibatch 139/139, validation error 0.500000 %
training @ iter =  20200
epoch 146, minibatch 139/139, validation error 0.500000 %
training @ iter =  20300
training @ iter =  20400
epoch 147, minibatch 139/139, validation error 0.500000 %
training @ iter =  20500
epoch 148, minibatch 139/139, validation error 0.500000 %
training @ iter =  20600
training @ iter =  20700
epoch 149, minibatch 139/139, validation error 0.500000 %
training @ iter =  20800
epoch 150, minibatch 139/139, validation error 0.500000 %
training @ iter =  20900
epoch 151, minibatch 139/139, validation error 0.500000 %
training @ iter =  21000
training @ iter =  21100
epoch 152, minibatch 139/139, validation error 0.500000 %
training @ iter =  21200
epoch 153, minibatch 139/139, validation error 0.500000 %
training @ iter =  21300
training @ iter =  21400
epoch 154, minibatch 139/139, validation error 0.500000 %
training @ iter =  21500
epoch 155, minibatch 139/139, validation error 0.500000 %
training @ iter =  21600
epoch 156, minibatch 139/139, validation error 0.500000 %
training @ iter =  21700
training @ iter =  21800
epoch 157, minibatch 139/139, validation error 0.500000 %
training @ iter =  21900
epoch 158, minibatch 139/139, validation error 0.500000 %
training @ iter =  22000
training @ iter =  22100
epoch 159, minibatch 139/139, validation error 0.500000 %
training @ iter =  22200
epoch 160, minibatch 139/139, validation error 0.500000 %
training @ iter =  22300
epoch 161, minibatch 139/139, validation error 0.500000 %
training @ iter =  22400
training @ iter =  22500
epoch 162, minibatch 139/139, validation error 0.500000 %
training @ iter =  22600
epoch 163, minibatch 139/139, validation error 0.500000 %
training @ iter =  22700
epoch 164, minibatch 139/139, validation error 0.500000 %
training @ iter =  22800
training @ iter =  22900
epoch 165, minibatch 139/139, validation error 0.500000 %
training @ iter =  23000
epoch 166, minibatch 139/139, validation error 0.492857 %
     epoch 166, minibatch 139/139, test error of best model 89.957143 %
training @ iter =  23100
training @ iter =  23200
epoch 167, minibatch 139/139, validation error 0.492857 %
training @ iter =  23300
epoch 168, minibatch 139/139, validation error 0.492857 %
training @ iter =  23400
epoch 169, minibatch 139/139, validation error 0.492857 %
training @ iter =  23500
training @ iter =  23600
epoch 170, minibatch 139/139, validation error 0.492857 %
training @ iter =  23700
epoch 171, minibatch 139/139, validation error 0.492857 %
training @ iter =  23800
training @ iter =  23900
epoch 172, minibatch 139/139, validation error 0.492857 %
training @ iter =  24000
epoch 173, minibatch 139/139, validation error 0.492857 %
training @ iter =  24100
epoch 174, minibatch 139/139, validation error 0.492857 %
training @ iter =  24200
training @ iter =  24300
epoch 175, minibatch 139/139, validation error 0.485714 %
     epoch 175, minibatch 139/139, test error of best model 89.957143 %
training @ iter =  24400
epoch 176, minibatch 139/139, validation error 0.485714 %
training @ iter =  24500
training @ iter =  24600
epoch 177, minibatch 139/139, validation error 0.485714 %
training @ iter =  24700
epoch 178, minibatch 139/139, validation error 0.485714 %
training @ iter =  24800
epoch 179, minibatch 139/139, validation error 0.485714 %
training @ iter =  24900
training @ iter =  25000
epoch 180, minibatch 139/139, validation error 0.485714 %
training @ iter =  25100
epoch 181, minibatch 139/139, validation error 0.485714 %
training @ iter =  25200
epoch 182, minibatch 139/139, validation error 0.485714 %
training @ iter =  25300
training @ iter =  25400
epoch 183, minibatch 139/139, validation error 0.478571 %
     epoch 183, minibatch 139/139, test error of best model 89.960714 %
training @ iter =  25500
epoch 184, minibatch 139/139, validation error 0.478571 %
training @ iter =  25600
training @ iter =  25700
epoch 185, minibatch 139/139, validation error 0.478571 %
training @ iter =  25800
epoch 186, minibatch 139/139, validation error 0.478571 %
training @ iter =  25900
epoch 187, minibatch 139/139, validation error 0.478571 %
training @ iter =  26000
training @ iter =  26100
epoch 188, minibatch 139/139, validation error 0.478571 %
training @ iter =  26200
epoch 189, minibatch 139/139, validation error 0.471429 %
     epoch 189, minibatch 139/139, test error of best model 89.960714 %
training @ iter =  26300
training @ iter =  26400
epoch 190, minibatch 139/139, validation error 0.471429 %
training @ iter =  26500
epoch 191, minibatch 139/139, validation error 0.471429 %
training @ iter =  26600
epoch 192, minibatch 139/139, validation error 0.471429 %
training @ iter =  26700
training @ iter =  26800
epoch 193, minibatch 139/139, validation error 0.471429 %
training @ iter =  26900
epoch 194, minibatch 139/139, validation error 0.471429 %
training @ iter =  27000
training @ iter =  27100
epoch 195, minibatch 139/139, validation error 0.471429 %
training @ iter =  27200
epoch 196, minibatch 139/139, validation error 0.471429 %
training @ iter =  27300
epoch 197, minibatch 139/139, validation error 0.471429 %
training @ iter =  27400
training @ iter =  27500
epoch 198, minibatch 139/139, validation error 0.471429 %
training @ iter =  27600
epoch 199, minibatch 139/139, validation error 0.471429 %
training @ iter =  27700
epoch 200, minibatch 139/139, validation error 0.471429 %
Optimization complete.
Best validation score of 0.471429 % obtained at iteration 26271, with test performance 89.960714 %
The code for file kaggle.pkl.gz ran for 57.85m

The Kaggle test set is unlabeled and I initialed them to be zero, so the test-score numbers are meaningless. I labeled the first 36 by eye and we can expect about 10% of the rest to be zeros so we would expect the reported test-set error ratio to be 0.8988; in other words, 90% error. We hope that the validation error is a better guide to the ongoing results of the model training; only the final submission to Kaggle will tell us for sure.


In [7]:
import numpy as np
def predict():
    """
    get the predicted labels
    """

    # load the saved model
    classifier = cPickle.load(open('best_cnn_pred_model.pkl','r'))

    return np.hstack([classifier[i] for i in range(len(classifier))])

In [8]:
predictions = predict()

In [9]:
datasets = load_data(dataset)
test_set_x, test_set_y = datasets[2]
test_set_x = test_set_x.get_value()


... loading data

In [10]:
print("First 36 actual  {}".format(test_set_y.eval()[:36]))
print("Predicted values {}".format(predictions[:36]))
print("difference       {}".format(predictions[:36]-test_set_y.eval()[:36]))


First 36 actual  [2 0 9 0 3 7 0 3 0 3 5 7 4 0 4 3 3 1 9 0 9 1 1 5 7 4 2 7 4 7 7 5 4 2 6 2]
Predicted values [2 0 9 0 3 7 0 3 0 3 5 7 4 0 4 3 3 1 9 0 9 1 1 5 7 4 2 7 4 7 7 5 4 2 6 2]
difference       [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]

Save the Kaggle submission file


In [12]:
with open('submission_theano_convolutional_MLP.csv', 'w') as f_result:
    f_result.write('"ImageId","Label"\n')
    for i, y in enumerate(predictions, 1):
        f_result.write('{},"{}"\n'.format(i,y))

In [ ]: